HYPERCALL(tmem_op)
};
+/* PVH 32bitfixme. */
+static hvm_hypercall_t *const pvh_hypercall64_table[NR_hypercalls] = {
+ HYPERCALL(platform_op),
+ HYPERCALL(memory_op),
+ HYPERCALL(xen_version),
+ HYPERCALL(console_io),
+ [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
+ HYPERCALL(vcpu_op),
+ HYPERCALL(mmuext_op),
+ HYPERCALL(xsm_op),
+ HYPERCALL(sched_op),
+ HYPERCALL(event_channel_op),
+ [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
+ HYPERCALL(hvm_op),
+ HYPERCALL(sysctl),
+ HYPERCALL(domctl)
+};
+
int hvm_do_hypercall(struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
if ( (eax & 0x80000000) && is_viridian_domain(curr->domain) )
return viridian_hypercall(regs);
- if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] )
+ if ( (eax >= NR_hypercalls) ||
+ (is_pvh_vcpu(curr) ? !pvh_hypercall64_table[eax]
+ : !hvm_hypercall32_table[eax]) )
{
regs->eax = -ENOSYS;
return HVM_HCALL_completed;
regs->r10, regs->r8, regs->r9);
curr->arch.hvm_vcpu.hcall_64bit = 1;
- regs->rax = hvm_hypercall64_table[eax](regs->rdi,
- regs->rsi,
- regs->rdx,
- regs->r10,
- regs->r8,
- regs->r9);
+ if ( is_pvh_vcpu(curr) )
+ regs->rax = pvh_hypercall64_table[eax](regs->rdi, regs->rsi,
+ regs->rdx, regs->r10,
+ regs->r8, regs->r9);
+ else
+ regs->rax = hvm_hypercall64_table[eax](regs->rdi, regs->rsi,
+ regs->rdx, regs->r10,
+ regs->r8, regs->r9);
curr->arch.hvm_vcpu.hcall_64bit = 0;
}
else
{
+ ASSERT(!is_pvh_vcpu(curr)); /* PVH 32bitfixme. */
+
HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%x, %x, %x, %x, %x, %x)", eax,
(uint32_t)regs->ebx, (uint32_t)regs->ecx,
(uint32_t)regs->edx, (uint32_t)regs->esi,
return -ESRCH;
rc = -EINVAL;
- if ( !is_hvm_domain(d) )
+ if ( !has_hvm_container_domain(d) )
+ goto param_fail;
+
+ if ( is_pvh_domain(d)
+ && (a.index != HVM_PARAM_CALLBACK_IRQ) )
goto param_fail;
rc = xsm_hvm_param(XSM_TARGET, d, op);
case PHYSDEVOP_set_iopl: {
struct physdev_set_iopl set_iopl;
+
+ ret = -ENOSYS;
+ if ( is_pvh_vcpu(current) )
+ break;
+
ret = -EFAULT;
if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
break;
case PHYSDEVOP_set_iobitmap: {
struct physdev_set_iobitmap set_iobitmap;
+
+ ret = -ENOSYS;
+ if ( is_pvh_vcpu(current) )
+ break;
+
ret = -EFAULT;
if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
break;
if ( current->domain == dom0 )
fi.submap |= 1U << XENFEAT_dom0;
#ifdef CONFIG_X86
- if ( is_pv_vcpu(current) )
+ switch ( d->guest_type )
+ {
+ case guest_type_pv:
fi.submap |= (1U << XENFEAT_mmu_pt_update_preserve_ad) |
(1U << XENFEAT_highmem_assist) |
(1U << XENFEAT_gnttab_map_avail_bits);
- else
+ break;
+ case guest_type_pvh:
+ fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
+ (1U << XENFEAT_supervisor_mode_kernel) |
+ (1U << XENFEAT_hvm_callback_vector);
+ break;
+ case guest_type_hvm:
fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
(1U << XENFEAT_hvm_callback_vector) |
(1U << XENFEAT_hvm_pirqs);
+ break;
+ }
#endif
break;
default: